Changing Traditional ID-based Entry Systems at Cornell University
A Project By Atahan Aksaray and Yen An Lu
We developed a facial recognition access system prototype using a Raspberry Pi. The project involved creating Python scripts for facial recognition and user interface control, constructing a 3D-printed door with a solenoid lock, and adding additional small doors to demonstrate room access. The system verifies captured facial images against a database and displays access permissions on a piTFT screen, ensuring only authorized users can enter.
The system comprised both hardware and software components:
For facial recognition, the Raspberry Pi Camera Module captured images, which were processed using OpenCV for facial detection and feature extraction. These features were compared against a database of authorized faces. Upon finding a match, the system proceeded to access verification.
When access was granted, a signal was sent to the Raspberry Pi to activate the relay module, which energized the solenoid lock and unlocked the door. The piTFT screen displayed the user's name and a map of accessible rooms, showing access permissions visually.
Map Control:
3D Printing:
Component Assembly:
Component Testing:
Software Testing:
System Integration Testing:
Week 2:
Week 3:
Week 4:
Week 5:
Through these iterative design and testing steps, we ensured that the system functioned as intended, providing a reliable and efficient facial recognition access control prototype. The combination of hardware assembly, software development, and rigorous testing allowed us to achieve a functional prototype ready for demonstration.
Overall, the project met many goals, demonstrating the potential of the facial recognition access system. However, some functionalities require further development and refinement to improve accuracy and completeness.
The Face Recognition Door Unlock System project successfully built a prototype using Raspberry Pi and OpenCV to control door access with facial recognition. While core functionalities like facial recognition, user interface, and solenoid lock control were developed, challenges arose with data collection affecting accuracy and piTFT screen flickering. The administrative interface and website integration also remain incomplete. Despite these hurdles, the project achieved significant progress, demonstrating the potential for a robust facial recognition-based door access control system. Future work will focus on refining the facial recognition algorithm, addressing the screen flickering, and completing the outstanding functionalities.
aa2485@cornell.edu
Designed the 3D Parts, Additional Carboard Doors, and the Map in map_control.py.
yl3726@cornell.edu
Implemented the Face Recognition Algorithm and Combined the Scripts to Work Correctly.
import cv2
import numpy as np
import os
# os.chdir("/home/pi/opencv-3.4.1/data/haarcascades")
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('/home/pi/FaceRecognition/trainer/trainer.yml')
cascadePath = "/home/pi/FaceRecognition/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#iniciate id counter
id = 0
# names related to ids: example ==> KUNAL: id=1, etc
names = ['None', 'David', 'Atahan', 'Evan', 'Z', 'W']
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img =cam.read()
#img = cv2.flip(img, -1) # Flip vertically
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence < 100):
print(confidence)
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
else:
id = "unknown"
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
#-----------------------------------------------------------------------------------
import RPi.GPIO as GPIO
import pygame
import os
import time
import RPi.GPIO as GPIO
# import numpy
# import pandas as pd
count1 = 0
count2 = 0
freq = 50
file_path = '/home/pi/FaceRecognition/current_id.txt'
class Motor():
def __init__(self, in1_pin, in2_pin, pwm_pin, frequency=50):
self.in1_pin = in1_pin
self.in2_pin = in2_pin
GPIO.setup(in1_pin, GPIO.OUT)
GPIO.setup(in2_pin, GPIO.OUT)
GPIO.setup(pwm_pin, GPIO.OUT)
self.pwm = GPIO.PWM(pwm_pin, frequency)
def set(self, in1_pin, in2_pin, dc):
GPIO.output(self.in1_pin, in1_pin)
GPIO.output(self.in2_pin, in2_pin)
self.pwm.start(dc)
def start(self, dc, clockwise=True):
if clockwise:
self.set(GPIO.HIGH, GPIO.LOW, dc)
else:
self.set(GPIO.LOW, GPIO.HIGH, dc)
def stop(self):
self.set(GPIO.LOW, GPIO.LOW, 0)
def cleanup(self):
self.pwm.ChangeDutyCycle(0)
self.pwm.stop()
# Environment variables
os.putenv('SDL_VIDEODRV','fbcon')
os.putenv('SDL_FBDEV', '/dev/fb0')
os.putenv('SDL_MOUSEDRV','dummy')
os.putenv('SDL_MOUSEDEV','/dev/null')
os.putenv('DISPLAY','')
# Initialize Pygame
pygame.init()
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GRAY = (200, 200, 200)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
# Screen dimensions
size = width, height = 320, 240
lcd = pygame.display.set_mode(size)
pygame.mouse.set_visible(False)
# Room definitions (x, y, width, height)
rooms = {
'R1': [(10, 40, 80, 60)],
'R2': [(100, 40, 40, 60)],
'R3': [(180, 40, 130, 60), (250, 100, 60, 70)],
'R4': [(10, 140, 40, 30)],
'R5': [(90, 140, 40, 30)],
'R6': [(140, 140, 70, 90)],
'R7': [(10, 180, 40, 50)],
'R8': [(90, 180, 40, 50)],
'R9': [(250, 180, 60, 50)]
}
# Function to draw the map
def draw_map(access_dict):
lcd.fill(BLACK)
font = pygame.font.Font(None, 30)
text_surface = font.render(person_name, True, WHITE)
text_x = (width - text_surface.get_width()) // 2
lcd.blit(text_surface, (text_x, 10))
for room, rect in rooms.items():
if access_dict.get(room, False):
color = GREEN
else:
color = RED
for rectangle in rect:
pygame.draw.rect(lcd, color, rectangle)
road_dict = {'Road': [(10, 110, 230, 20), (60, 130, 20, 100), (150, 40, 20, 70), (220, 130, 20, 100)]}
for road, rect in road_dict.items():
for rectangle in rect:
pygame.draw.rect(lcd, GRAY, rectangle)
pygame.display.flip()
# Buttons
def quit_button(channel):
global code_run
code_run = False
def left_button(channel):
global person_name, access, count1
count1=count1+1
if((access['R1'])&(count1>2)):
print("Open room 1")
print(count1)
m1.start(50, True)
time.sleep(0.25)
# ccw
m1.stop()
time.sleep(2)
m1.start(50, False)
time.sleep(0.25)
m1.stop()
def right_button(channel):
global person_name, access, count2
count2=count2+1
if((access['R2'])&(count2>2)):
print("Open room 2")
print(count2)
m2.start(50, True)
time.sleep(0.25)
# ccw
m2.stop()
time.sleep(2)
m2.start(50, False)
time.sleep(0.25)
m2.stop()
# Set up GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(17, GPIO.FALLING, callback=left_button, bouncetime=2000)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(22, GPIO.FALLING, callback=right_button, bouncetime=2000)
# Initialize screen
lcd.fill(BLACK)
font = pygame.font.Font(None, 30)
text_surface = font.render("Waiting...", True, WHITE)
text_x = (width - text_surface.get_width()) // 2
lcd.blit(text_surface, (text_x, 10))
for room, rect in rooms.items():
for rectangle in rect:
pygame.draw.rect(lcd, GRAY, rectangle)
road_dict = {'Road': [(10, 110, 230, 20), (60, 130, 20, 100), (150, 40, 20, 70), (220, 130, 20, 100)]}
for road, rect in road_dict.items():
for rectangle in rect:
pygame.draw.rect(lcd, GRAY, rectangle)
pygame.display.flip()
try:
global code_run
code_run = True
m1 = Motor(20, 19, 13)
m2 = Motor(5, 6, 26)
waiting_counter = 0
# Main loop where we run the face recognition
current_id = ""
wrong_input = False
while code_run:
with open(file_path, 'r') as file:
# Read the first line
first_line = file.readline().strip()
# Check if the first line is not empty
waiting_counter = waiting_counter + 1
print(waiting_counter)
print(first_line)
print(current_id)
print(wrong_input)
if((wrong_input)&(first_line!="")):
first_line = ""
wrong_input = False
with open('/home/pi/FaceRecognition/current_id.txt', 'w') as f:
f.close()
if(waiting_counter==25):
# wrong_input = True
access = {'R1': False, 'R2': False, 'R3': False, 'R4': False, 'R5': False, 'R6': False, 'R7': False, 'R8': False, 'R9': False}
current_id = ""
person_name = ""
with open('/home/pi/FaceRecognition/current_id.txt', 'w') as f:
f.close()
lcd.fill(BLACK)
font = pygame.font.Font(None, 30)
text_surface = font.render("Waiting...", True, WHITE)
text_x = (width - text_surface.get_width()) // 2
lcd.blit(text_surface, (text_x, 10))
for room, rect in rooms.items():
for rectangle in rect:
pygame.draw.rect(lcd, GRAY, rectangle)
road_dict = {'Road': [(10, 110, 230, 20), (60, 130, 20, 100), (150, 40, 20, 70), (220, 130, 20, 100)]}
for road, rect in road_dict.items():
for rectangle in rect:
pygame.draw.rect(lcd, GRAY, rectangle)
pygame.display.flip()
elif ((first_line=="David")&(current_id!="David")):
count1 = 0
count2 = 0
waiting_counter = 0
print("Welcome ", first_line)
person_name = "David"
access = {'R1': True, 'R2': False, 'R3': False, 'R4': True, 'R5': False, 'R6': True, 'R7': False, 'R8': True, 'R9': False}
draw_map(access)
print("Front door open")
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
GPIO.output(23, 1)
time.sleep(2)
GPIO.output(23, 0)
print("Front door closed")
current_id = "David"
elif ((first_line=="Atahan")&(current_id!="Atahan")):
count1 = 0
count2 = 0
waiting_counter = 0
print("Welcome ", first_line)
person_name = "Atahan"
access = {'R1': True, 'R2': True, 'R3': True, 'R4': False, 'R5': True, 'R6': False, 'R7': True, 'R8': True, 'R9': True}
draw_map(access)
print("Front door open")
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
GPIO.output(23, 1)
time.sleep(5)
GPIO.output(23, 0)
print("Front door closed")
current_id = "Atahan"
elif ((first_line=="Evan")&(current_id!="Evan")):
count1 = 0
count2 = 0
waiting_counter = 0
print("Welcome ", first_line)
person_name = "Evan"
access = {'R1': True, 'R2': True, 'R3': True, 'R4': False, 'R5': True, 'R6': False, 'R7': True, 'R8': True, 'R9': True}
draw_map(access)
print("Front door open")
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
GPIO.output(23, 1)
time.sleep(5)
GPIO.output(23, 0)
print("Front door closed")
current_id = "Evan"
# Sleep for a while before reading again
time.sleep(1)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# Example of updating the map with new data
# In practice, this would be triggered by new data from the facial recognition system
# draw_map(new_access_data)
except KeyboardInterrupt:
with open('/home/pi/FaceRecognition/current_id.txt', 'w') as f:
f.close()
GPIO.cleanup()
print("Program exited by the user.")
finally:
GPIO.cleanup()
pygame.quit()
# The map will be gray at first while the face recognition is running
# When the new information comes in using the same format in 'rooms', the map will be updated
}